# Load polygons for aggregate regions based on trawl survey strataregions <-read_sf(here::here("Data/raw", "nmfs_trawl_regions_collection.geojson"))# Prepare the regions as single polygons:gb <-filter(regions, area =="Georges Bank") %>%st_union() %>%st_as_sf()gom <-filter(regions, area =="Gulf of Maine") %>%st_union() %>%st_as_sf()mab <-filter(regions, area =="Mid-Atlantic Bight") %>%st_union() %>%st_as_sf()sne <-filter(regions, area =="Southern New England") %>%st_union() %>%st_as_sf()
Code
# month weighting so we don't have to go back to daily when averagingseas_month_wts <-data.frame("month"=c("03", "04", "05", "09", "10", "11"),"n_days"=days_in_month(as.Date(c("2000-03-01","2000-04-01","2000-05-01","2000-09-01","2000-10-01","2000-11-01"))))# Calculate average for the seasonseasonal_temps <- regional_clim %>%left_join(seas_month_wts) %>%mutate(season =ifelse( month %in%str_pad(c(3:5), side ="left", pad ="0", width =2), "Spring", NA),season =ifelse( month %in%str_pad(c(9:11), side ="left", pad ="0", width =2), "Fall", season),survey_area =factor(survey_area, levels = area_levels_long)) %>%drop_na(season) %>%group_by(survey_area, year, season) %>%summarise(sst =weighted.mean(sst, w = n_days),clim_sst =weighted.mean(clim_sst, w = n_days),sst_anom =weighted.mean(sst_anom, w = n_days),bot_temp =weighted.mean(bot_temp, w = n_days),clim_bt =weighted.mean(clim_bt, w = n_days),bt_anom =weighted.mean(bt_anom, w = n_days),.groups ="drop" )# Calculate annual averagesannual_temps <- regional_clim %>%mutate(n_days =days_in_month(date)) %>%group_by(survey_area, year) %>%summarise(sst =weighted.mean(sst, w = n_days),clim_sst =weighted.mean(clim_sst, w = n_days),sst_anom =weighted.mean(sst_anom, w = n_days),bot_temp =weighted.mean(bot_temp, w = n_days),clim_bt =weighted.mean(clim_bt, w = n_days),bt_anom =weighted.mean(bt_anom, w = n_days),.groups ="drop" )
ggplot(regional_clim) +geom_line(aes(date, sst_anom, color ="Surface Temperature Anomalies")) +geom_line(aes(date, bt_anom, color ="Bottom Temperature Anomalies")) +scale_color_gmri(reverse = T) +facet_wrap(~survey_area, ncol =1) +labs(y ="Temperature Anomaly", color ="Color")
Code
# Make these seasonal as if we were matching it to seasonal trawl data# Calculate average for the seasonseasonal_temps %>%rename(`Bottom Temperature`= bt_anom, `Surface Temperature`= sst_anom) %>%pivot_longer(names_to ="var", values_to ="vals", cols =ends_with("Temperature")) %>%ggplot(aes(year, vals, color = survey_area)) +geom_point(alpha =0.35, size =0.5) +geom_ma(aes(linetype ="5-Year Moving Average"), n =5, ma_fun = SMA) +facet_grid(fct_rev(var) ~fct_rev(season)) +scale_color_gmri() +labs(y ="Temperature Anomaly\n(1991-2020 Baseline)",linetype ="",color ="Area")
Long-Term Trends and Net Change
These two datasets span a different range of years. For bottom temperature we will use from 1970 onward b/c we don’t have any other data before then in this project so we already are framing from 1970-onward anyways. Surface temperature will be done from 1982 onward because that is the first full year.
In the event of first order trends, net change over that time-span will be calculated as rate * years.
I’ll also be focusing on seasons to match the trawl survey as well.
Annual Trends
Code
# Run the bottom tempoerature trends on annual summariesstart_yr <-1970; end_yr <-2019annual_bottom_trends <- annual_temps %>%filter(year %in%c(start_yr:end_yr)) %>%split(.$survey_area) %>%map_dfr(function(.x){ annual_mod <-lm(bot_temp ~ year, data = .x) mod_summ <-summary(annual_mod)[["coefficients"]]# summary info mod_trend =ifelse(mod_summ[2,4] <0.05, T, F) mod_rate =coef(annual_mod)[[2]]# tidy little output dataframe signif <-data.frame("season"=c("Annual"),"trend"=c(mod_trend),"rate"=c(mod_rate) )return(signif)},.id ="survey_area") %>%mutate(var ="Bottom Temperature\n(1970-2019)",temp_delta =ifelse(trend == T, rate * (end_yr - start_yr), NA))
Code
# Run the bottom tempoerature trends on annual summariesstart_yr <-1982; end_yr <-2019annual_surface_trends <- annual_temps %>%filter(year %in%c(start_yr:end_yr)) %>%split(.$survey_area) %>%map_dfr(function(.x){# linear model annual_mod <-lm(sst ~ year, data = .x) annual_summ <-summary(annual_mod)[["coefficients"]]# summary info mod_trend =ifelse(annual_summ[2,4] <0.05, T, F) mod_rate =coef(annual_mod)[[2]]# tidy little output dataframe signif <-data.frame("season"=c("Annual"),"trend"=c(mod_trend),"rate"=c(mod_rate) )return(signif)},.id ="survey_area") %>%mutate(var ="Surface Temperature\n(1982-2019)",temp_delta =ifelse(trend == T, rate * (end_yr - start_yr), NA))
Code
# Combine those jokersannual_trends_summary <-bind_rows(annual_surface_trends, annual_bottom_trends) %>%mutate(survey_area =fct_relevel(survey_area, rev(area_levels_long)))# Dumbell Plotggplot(annual_trends_summary) +geom_segment(aes(y = survey_area, yend = survey_area, x =0, xend = temp_delta, color = var), position =position_dodge()) +geom_point(aes(x = temp_delta, y = survey_area, color = var)) +geom_vline(xintercept =0) +scale_color_gmri() +scale_x_continuous(labels =label_number(suffix = deg_c)) +facet_wrap(~var, ncol =2) +theme(panel.grid.major.y =element_blank()) +labs(x =expression(Delta~"Temperature"), y ="", title ="Net Temperature Change",fill ="")
Code
# Maybe this should be a table:warming_table <- annual_trends_summary %>%select(-c(season, trend)) %>%rename(area = survey_area,`Net Change`= temp_delta, `Rate of Warming`= rate) %>%group_by(var) %>%gt(rowname_col ="area") %>% gt::tab_header(title ="Regional Surface and Bottom Temperature Change") %>%fmt(columns =`Rate of Warming`, fns =function(x){str_c(signif(x,2), deg_c, " / year")}) %>%fmt(columns =`Net Change`, fns =function(x){str_c(signif(x,2), deg_c)})warming_table
# Make some models using anomalies or temperatures# actual modlibrary(lmerTest)library(performance)conflicted::conflicts_prefer(lmerTest::lmer)# Lets compare which one does betteractual_temps_model <-lmer( b ~ survey_area * season *scale(roll_temp) + survey_area*scale(log(total_weight_lb)) + (1|est_year), data = wtb_model_df)anomalies_temps_model <-lmer( b ~ survey_area * season *scale(roll_anom) + survey_area*scale(log(total_weight_lb)) + (1|est_year), data = wtb_model_df)# # Make it make sense# summary(two_temps_model)check_model(actual_temps_model)